Merge pull request #1197 from dp-arm/dp/amu
authordavidcunado-arm <[email protected]>
Fri, 12 Jan 2018 09:02:24 +0000 (09:02 +0000)
committerGitHub <[email protected]>
Fri, 12 Jan 2018 09:02:24 +0000 (09:02 +0000)
AMUv1 support

1  2 
bl31/bl31.mk
include/lib/aarch64/arch.h
lib/cpus/aarch64/cortex_a75.S

diff --cc bl31/bl31.mk
Simple merge
Simple merge
index 9b54b48f7d54d12504ea285319533929d53c7441,7bd20202313333c3de5e32fc5bbb5a6a8fd6667a..e66ad06670eb65b4df676164a67e0b0b2c706f1f
  #include <plat_macros.S>
  #include <cortex_a75.h>
  
+       .globl  cortex_a75_amu_cnt_read
+       .globl  cortex_a75_amu_cnt_write
+       .globl  cortex_a75_amu_read_cpuamcntenset_el0
+       .globl  cortex_a75_amu_read_cpuamcntenclr_el0
+       .globl  cortex_a75_amu_write_cpuamcntenset_el0
+       .globl  cortex_a75_amu_write_cpuamcntenclr_el0
+ /*
+  * uint64_t cortex_a75_amu_cnt_read(int idx);
+  *
+  * Given `idx`, read the corresponding AMU counter
+  * and return it in `x0`.
+  */
+ func cortex_a75_amu_cnt_read
+       adr     x1, 1f
+       lsl     x0, x0, #3
+       add     x1, x1, x0
+       br      x1
+ 1:
+       mrs     x0, CPUAMEVCNTR0_EL0
+       ret
+       mrs     x0, CPUAMEVCNTR1_EL0
+       ret
+       mrs     x0, CPUAMEVCNTR2_EL0
+       ret
+       mrs     x0, CPUAMEVCNTR3_EL0
+       ret
+       mrs     x0, CPUAMEVCNTR4_EL0
+       ret
+ endfunc cortex_a75_amu_cnt_read
+ /*
+  * void cortex_a75_amu_cnt_write(int idx, uint64_t val);
+  *
+  * Given `idx`, write `val` to the corresponding AMU counter.
+  */
+ func cortex_a75_amu_cnt_write
+       adr     x2, 1f
+       lsl     x0, x0, #3
+       add     x2, x2, x0
+       br      x2
+ 1:
+       msr     CPUAMEVCNTR0_EL0, x0
+       ret
+       msr     CPUAMEVCNTR1_EL0, x0
+       ret
+       msr     CPUAMEVCNTR2_EL0, x0
+       ret
+       msr     CPUAMEVCNTR3_EL0, x0
+       ret
+       msr     CPUAMEVCNTR4_EL0, x0
+       ret
+ endfunc cortex_a75_amu_cnt_write
+ /*
+  * unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
+  *
+  * Read the `CPUAMCNTENSET_EL0` CPU register and return
+  * it in `x0`.
+  */
+ func cortex_a75_amu_read_cpuamcntenset_el0
+       mrs     x0, CPUAMCNTENSET_EL0
+       ret
+ endfunc cortex_a75_amu_read_cpuamcntenset_el0
+ /*
+  * unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
+  *
+  * Read the `CPUAMCNTENCLR_EL0` CPU register and return
+  * it in `x0`.
+  */
+ func cortex_a75_amu_read_cpuamcntenclr_el0
+       mrs     x0, CPUAMCNTENCLR_EL0
+       ret
+ endfunc cortex_a75_amu_read_cpuamcntenclr_el0
+ /*
+  * void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
+  *
+  * Write `mask` to the `CPUAMCNTENSET_EL0` CPU register.
+  */
+ func cortex_a75_amu_write_cpuamcntenset_el0
+       msr     CPUAMCNTENSET_EL0, x0
+       ret
+ endfunc cortex_a75_amu_write_cpuamcntenset_el0
+ /*
+  * void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
+  *
+  * Write `mask` to the `CPUAMCNTENCLR_EL0` CPU register.
+  */
+ func cortex_a75_amu_write_cpuamcntenclr_el0
+       mrs     x0, CPUAMCNTENCLR_EL0
+       ret
+ endfunc cortex_a75_amu_write_cpuamcntenclr_el0
  func cortex_a75_reset_func
 +#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
 +      mrs     x0, id_aa64pfr0_el1
 +      ubfx    x0, x0, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
 +      /*
 +       * If the field equals to 1 then branch targets trained in one
 +       * context cannot affect speculative execution in a different context.
 +       */
 +      cmp     x0, #1
 +      beq     1f
 +
 +      adr     x0, workaround_bpiall_vbar0_runtime_exceptions
 +      msr     vbar_el3, x0
 +1:
 +#endif
 +
  #if ENABLE_AMU
        /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
        mrs     x0, actlr_el3